Only intercept #DB/#BP if a debugger is attached.
Signed-off-by: Keir Fraser <keir@xensource.com>
*/
if ( !(v->arch.hvm_svm.cpu_shadow_cr0 & X86_CR0_TS) )
{
- v->arch.hvm_svm.vmcb->exception_intercepts |= EXCEPTION_BITMAP_NM;
+ v->arch.hvm_svm.vmcb->exception_intercepts |= 1U << TRAP_no_device;
vmcb->cr0 |= X86_CR0_TS;
}
}
{
struct vmcb_struct *vmcb = v->arch.hvm_svm.vmcb;
- ASSERT(vmcb);
-
ctxt->eax = vmcb->rax;
ctxt->ss = vmcb->ss.sel;
ctxt->esp = vmcb->rsp;
ctxt->ds = vmcb->ds.sel;
}
-static void svm_store_cpu_user_regs(struct cpu_user_regs *regs, struct vcpu *v)
+static void svm_load_cpu_guest_regs(struct vcpu *v, struct cpu_user_regs *regs)
{
struct vmcb_struct *vmcb = v->arch.hvm_svm.vmcb;
-
- regs->eip = vmcb->rip;
- regs->esp = vmcb->rsp;
- regs->eflags = vmcb->rflags;
- regs->cs = vmcb->cs.sel;
- regs->ds = vmcb->ds.sel;
- regs->es = vmcb->es.sel;
- regs->ss = vmcb->ss.sel;
-}
-
-/* XXX Use svm_load_cpu_guest_regs instead */
-static void svm_load_cpu_user_regs(struct vcpu *v, struct cpu_user_regs *regs)
-{
- struct vmcb_struct *vmcb = v->arch.hvm_svm.vmcb;
- u32 *intercepts = &v->arch.hvm_svm.vmcb->exception_intercepts;
- /* Write the guest register value into VMCB */
vmcb->rax = regs->eax;
vmcb->ss.sel = regs->ss;
vmcb->rsp = regs->esp;
vmcb->rflags = regs->eflags | 2UL;
vmcb->cs.sel = regs->cs;
vmcb->rip = regs->eip;
- if (regs->eflags & EF_TF)
- *intercepts |= EXCEPTION_BITMAP_DB;
- else
- *intercepts &= ~EXCEPTION_BITMAP_DB;
-}
-
-static void svm_load_cpu_guest_regs(
- struct vcpu *v, struct cpu_user_regs *regs)
-{
- svm_load_cpu_user_regs(v, regs);
}
static void svm_ctxt_switch_from(struct vcpu *v)
svm_restore_dr(v);
}
-static void arch_svm_do_resume(struct vcpu *v)
+static void svm_do_resume(struct vcpu *v)
{
+ bool_t debug_state = v->domain->debugger_attached;
+
+ if ( unlikely(v->arch.hvm_vcpu.debug_state_latch != debug_state) )
+ {
+ uint32_t mask = (1U << TRAP_debug) | (1U << TRAP_int3);
+ v->arch.hvm_vcpu.debug_state_latch = debug_state;
+ if ( debug_state )
+ v->arch.hvm_svm.vmcb->exception_intercepts |= mask;
+ else
+ v->arch.hvm_svm.vmcb->exception_intercepts &= ~mask;
+ }
+
if ( v->arch.hvm_svm.launch_core != smp_processor_id() )
{
v->arch.hvm_svm.launch_core = smp_processor_id();
{
int rc;
- v->arch.schedule_tail = arch_svm_do_resume;
+ v->arch.schedule_tail = svm_do_resume;
v->arch.ctxt_switch_from = svm_ctxt_switch_from;
v->arch.ctxt_switch_to = svm_ctxt_switch_to;
struct vcpu *v = current;
setup_fpu(v);
- vmcb->exception_intercepts &= ~EXCEPTION_BITMAP_NM;
+ vmcb->exception_intercepts &= ~(1U << TRAP_no_device);
if ( !(v->arch.hvm_svm.cpu_shadow_cr0 & X86_CR0_TS) )
vmcb->cr0 &= ~X86_CR0_TS;
}
-
-static void svm_do_general_protection_fault(struct vcpu *v,
- struct cpu_user_regs *regs)
-{
- struct vmcb_struct *vmcb = v->arch.hvm_svm.vmcb;
- unsigned long eip, error_code;
-
- ASSERT(vmcb);
-
- eip = vmcb->rip;
- error_code = vmcb->exitinfo1;
-
- if (vmcb->idtr.limit == 0) {
- printk("Huh? We got a GP Fault with an invalid IDTR!\n");
- svm_dump_vmcb(__func__, vmcb);
- svm_dump_regs(__func__, regs);
- svm_dump_inst(svm_rip2pointer(v));
- domain_crash(v->domain);
- return;
- }
-
- HVM_DBG_LOG(DBG_LEVEL_1,
- "svm_general_protection_fault: eip = %lx, erro_code = %lx",
- eip, error_code);
-
- HVM_DBG_LOG(DBG_LEVEL_1,
- "eax=%lx, ebx=%lx, ecx=%lx, edx=%lx, esi=%lx, edi=%lx",
- (unsigned long)regs->eax, (unsigned long)regs->ebx,
- (unsigned long)regs->ecx, (unsigned long)regs->edx,
- (unsigned long)regs->esi, (unsigned long)regs->edi);
-
- /* Reflect it back into the guest */
- svm_inject_exception(v, TRAP_gp_fault, 1, error_code);
-}
-
/* Reserved bits ECX: [31:14], [12:4], [2:1]*/
#define SVM_VCPU_CPUID_L1_ECX_RESERVED 0xffffdff6
/* Reserved bits EDX: [31:29], [27], [22:20], [18], [10] */
/* TS cleared? Then initialise FPU now. */
if ( !(value & X86_CR0_TS) ) {
setup_fpu(v);
- vmcb->exception_intercepts &= ~EXCEPTION_BITMAP_NM;
+ vmcb->exception_intercepts &= ~(1U << TRAP_no_device);
}
paging_update_paging_modes(v);
if ( !(value & X86_CR0_TS) )
{
setup_fpu(v);
- vmcb->exception_intercepts &= ~EXCEPTION_BITMAP_NM;
+ vmcb->exception_intercepts &= ~(1U << TRAP_no_device);
}
HVM_DBG_LOG(DBG_LEVEL_VMMU, "Update CR0 value = %lx\n", value);
case INSTR_CLTS:
/* TS being cleared means that it's time to restore fpu state. */
setup_fpu(current);
- vmcb->exception_intercepts &= ~EXCEPTION_BITMAP_NM;
+ vmcb->exception_intercepts &= ~(1U << TRAP_no_device);
vmcb->cr0 &= ~X86_CR0_TS; /* clear TS */
v->arch.hvm_svm.cpu_shadow_cr0 &= ~X86_CR0_TS; /* clear TS */
break;
__update_guest_eip(vmcb, inst_len);
}
-
static inline void svm_vmexit_do_hlt(struct vmcb_struct *vmcb)
{
__update_guest_eip(vmcb, 1);
hvm_hlt(vmcb->rflags);
}
-
static void svm_vmexit_do_invd(struct vcpu *v)
{
struct vmcb_struct *vmcb = v->arch.hvm_svm.vmcb;
__update_guest_eip(vmcb, inst_len);
}
-
-
-
-#ifdef XEN_DEBUGGER
-static void svm_debug_save_cpu_user_regs(struct vmcb_struct *vmcb,
- struct cpu_user_regs *regs)
-{
- regs->eip = vmcb->rip;
- regs->esp = vmcb->rsp;
- regs->eflags = vmcb->rflags;
-
- regs->xcs = vmcb->cs.sel;
- regs->xds = vmcb->ds.sel;
- regs->xes = vmcb->es.sel;
- regs->xfs = vmcb->fs.sel;
- regs->xgs = vmcb->gs.sel;
- regs->xss = vmcb->ss.sel;
-}
-
-
-static void svm_debug_restore_cpu_user_regs(struct cpu_user_regs *regs)
-{
- vmcb->ss.sel = regs->xss;
- vmcb->rsp = regs->esp;
- vmcb->rflags = regs->eflags;
- vmcb->cs.sel = regs->xcs;
- vmcb->rip = regs->eip;
-
- vmcb->gs.sel = regs->xgs;
- vmcb->fs.sel = regs->xfs;
- vmcb->es.sel = regs->xes;
- vmcb->ds.sel = regs->xds;
-}
-#endif
-
-
void svm_handle_invlpg(const short invlpga, struct cpu_user_regs *regs)
{
struct vcpu *v = current;
switch (exit_reason)
{
- case VMEXIT_EXCEPTION_DB:
- {
-#ifdef XEN_DEBUGGER
- svm_debug_save_cpu_user_regs(regs);
- pdb_handle_exception(1, regs, 1);
- svm_debug_restore_cpu_user_regs(regs);
-#else
- svm_store_cpu_user_regs(regs, v);
- domain_pause_for_debugger();
-#endif
- }
- break;
-
case VMEXIT_INTR:
/* Asynchronous event, handled when we STGI'd after the VMEXIT. */
HVMTRACE_0D(INTR, v);
break;
+
case VMEXIT_NMI:
/* Asynchronous event, handled when we STGI'd after the VMEXIT. */
HVMTRACE_0D(NMI, v);
break;
+
case VMEXIT_SMI:
/* Asynchronous event, handled when we STGI'd after the VMEXIT. */
HVMTRACE_0D(SMI, v);
break;
- case VMEXIT_INIT:
- BUG(); /* unreachable */
+ case VMEXIT_EXCEPTION_DB:
+ if ( v->domain->debugger_attached )
+ domain_pause_for_debugger();
+ else
+ svm_inject_exception(v, TRAP_debug, 0, 0);
+ break;
case VMEXIT_EXCEPTION_BP:
-#ifdef XEN_DEBUGGER
- svm_debug_save_cpu_user_regs(regs);
- pdb_handle_exception(3, regs, 1);
- svm_debug_restore_cpu_user_regs(regs);
-#else
- if ( test_bit(_DOMF_debugging, &v->domain->domain_flags) )
+ if ( v->domain->debugger_attached )
domain_pause_for_debugger();
else
svm_inject_exception(v, TRAP_int3, 0, 0);
-#endif
break;
case VMEXIT_EXCEPTION_NM:
svm_do_no_device_fault(vmcb);
break;
- case VMEXIT_EXCEPTION_GP:
- /* This should probably not be trapped in the future */
- regs->error_code = vmcb->exitinfo1;
- svm_do_general_protection_fault(v, regs);
- break;
-
- case VMEXIT_EXCEPTION_PF:
- {
+ case VMEXIT_EXCEPTION_PF: {
unsigned long va;
va = vmcb->exitinfo2;
regs->error_code = vmcb->exitinfo1;
break;
}
- case VMEXIT_EXCEPTION_DF:
- /* Debug info to hopefully help debug WHY the guest double-faulted. */
- svm_dump_vmcb(__func__, vmcb);
- svm_dump_regs(__func__, regs);
- svm_dump_inst(svm_rip2pointer(v));
- svm_inject_exception(v, TRAP_double_fault, 1, 0);
- break;
-
case VMEXIT_VINTR:
vmcb->vintr.fields.irq = 0;
vmcb->general1_intercepts &= ~GENERAL1_INTERCEPT_VINTR;
paging_update_paging_modes(v);
vmcb->cr3 = v->arch.hvm_vcpu.hw_cr3;
- arch_svm->vmcb->exception_intercepts = MONITOR_DEFAULT_EXCEPTION_BITMAP;
-
if ( paging_mode_hap(v->domain) )
{
vmcb->cr0 = arch_svm->cpu_shadow_cr0;
vmcb->np_enable = 1; /* enable nested paging */
vmcb->g_pat = 0x0007040600070406ULL; /* guest PAT */
- vmcb->exception_intercepts &= ~EXCEPTION_BITMAP_PG;
vmcb->h_cr3 = pagetable_get_paddr(v->domain->arch.phys_table);
vmcb->cr4 = arch_svm->cpu_shadow_cr4 = 0;
}
+ else
+ {
+ vmcb->exception_intercepts = 1U << TRAP_page_fault;
+ }
return 0;
}
if ( v == current )
return;
- /* Don't confuse arch_vmx_do_resume (for @v or @current!) */
+ /* Don't confuse vmx_do_resume (for @v or @current!) */
vmx_clear_vmcs(v);
if ( is_hvm_vcpu(current) )
vmx_load_vmcs(current);
__vmwrite(VMCS_LINK_POINTER_HIGH, ~0UL);
#endif
- __vmwrite(EXCEPTION_BITMAP, MONITOR_DEFAULT_EXCEPTION_BITMAP);
+ __vmwrite(EXCEPTION_BITMAP, 1U << TRAP_page_fault);
/* Guest CR0. */
cr0 = read_cr0();
domain_crash_synchronous();
}
-void arch_vmx_do_resume(struct vcpu *v)
+void vmx_do_resume(struct vcpu *v)
{
+ bool_t debug_state;
+
if ( v->arch.hvm_vmx.active_cpu == smp_processor_id() )
{
vmx_load_vmcs(v);
vmx_set_host_env(v);
}
+ debug_state = v->domain->debugger_attached;
+ if ( unlikely(v->arch.hvm_vcpu.debug_state_latch != debug_state) )
+ {
+ unsigned long intercepts = __vmread(EXCEPTION_BITMAP);
+ unsigned long mask = (1U << TRAP_debug) | (1U << TRAP_int3);
+ v->arch.hvm_vcpu.debug_state_latch = debug_state;
+ if ( debug_state )
+ intercepts |= mask;
+ else
+ intercepts &= ~mask;
+ __vmwrite(EXCEPTION_BITMAP, intercepts);
+ }
+
hvm_do_resume(v);
reset_stack_and_jump(vmx_asm_do_vmentry);
}
spin_lock_init(&v->arch.hvm_vmx.vmcs_lock);
- v->arch.schedule_tail = arch_vmx_do_resume;
+ v->arch.schedule_tail = vmx_do_resume;
v->arch.ctxt_switch_from = vmx_ctxt_switch_from;
v->arch.ctxt_switch_to = vmx_ctxt_switch_to;
/* NB. Bit 1 of RFLAGS must be set for VMENTRY to succeed. */
__vmwrite(GUEST_RFLAGS, regs->eflags | 2UL);
- if ( regs->eflags & EF_TF )
- __vm_set_bit(EXCEPTION_BITMAP, EXCEPTION_BITMAP_DB);
- else
- __vm_clear_bit(EXCEPTION_BITMAP, EXCEPTION_BITMAP_DB);
-
if ( regs->eflags & EF_VM )
{
/*
{
v->arch.hvm_vmx.cpu_cr0 |= X86_CR0_TS;
__vmwrite(GUEST_CR0, v->arch.hvm_vmx.cpu_cr0);
- __vm_set_bit(EXCEPTION_BITMAP, EXCEPTION_BITMAP_NM);
+ __vm_set_bit(EXCEPTION_BITMAP, TRAP_no_device);
}
}
struct vcpu *v = current;
setup_fpu(current);
- __vm_clear_bit(EXCEPTION_BITMAP, EXCEPTION_BITMAP_NM);
+ __vm_clear_bit(EXCEPTION_BITMAP, TRAP_no_device);
/* Disable TS in guest CR0 unless the guest wants the exception too. */
if ( !(v->arch.hvm_vmx.cpu_shadow_cr0 & X86_CR0_TS) )
if ( !(value & X86_CR0_TS) )
{
setup_fpu(v);
- __vm_clear_bit(EXCEPTION_BITMAP, EXCEPTION_BITMAP_NM);
+ __vm_clear_bit(EXCEPTION_BITMAP, TRAP_no_device);
}
v->arch.hvm_vmx.cpu_cr0 = (value | X86_CR0_PE | X86_CR0_PG
mov_from_cr(cr, gp, regs);
break;
case TYPE_CLTS:
-// TRACE_VMEXIT(1, TYPE_CLTS);
-
/* We initialise the FPU now, to avoid needing another vmexit. */
setup_fpu(v);
- __vm_clear_bit(EXCEPTION_BITMAP, EXCEPTION_BITMAP_NM);
+ __vm_clear_bit(EXCEPTION_BITMAP, TRAP_no_device);
v->arch.hvm_vmx.cpu_cr0 &= ~X86_CR0_TS; /* clear TS */
__vmwrite(GUEST_CR0, v->arch.hvm_vmx.cpu_cr0);
value = v->arch.hvm_vmx.cpu_shadow_cr0;
value = (value & ~0xF) |
(((exit_qualification & LMSW_SOURCE_DATA) >> 16) & 0xF);
-// TRACE_VMEXIT(1, TYPE_LMSW);
-// TRACE_VMEXIT(2, value);
return vmx_set_cr0(value);
- break;
default:
BUG();
}
}
}
-#if defined (__x86_64__)
-void store_cpu_user_regs(struct cpu_user_regs *regs)
-{
- regs->ss = __vmread(GUEST_SS_SELECTOR);
- regs->rsp = __vmread(GUEST_RSP);
- regs->rflags = __vmread(GUEST_RFLAGS);
- regs->cs = __vmread(GUEST_CS_SELECTOR);
- regs->ds = __vmread(GUEST_DS_SELECTOR);
- regs->es = __vmread(GUEST_ES_SELECTOR);
- regs->rip = __vmread(GUEST_RIP);
-}
-#elif defined (__i386__)
-void store_cpu_user_regs(struct cpu_user_regs *regs)
-{
- regs->ss = __vmread(GUEST_SS_SELECTOR);
- regs->esp = __vmread(GUEST_RSP);
- regs->eflags = __vmread(GUEST_RFLAGS);
- regs->cs = __vmread(GUEST_CS_SELECTOR);
- regs->ds = __vmread(GUEST_DS_SELECTOR);
- regs->es = __vmread(GUEST_ES_SELECTOR);
- regs->eip = __vmread(GUEST_RIP);
-}
-#endif
-
-#ifdef XEN_DEBUGGER
-void save_cpu_user_regs(struct cpu_user_regs *regs)
-{
- regs->xss = __vmread(GUEST_SS_SELECTOR);
- regs->esp = __vmread(GUEST_RSP);
- regs->eflags = __vmread(GUEST_RFLAGS);
- regs->xcs = __vmread(GUEST_CS_SELECTOR);
- regs->eip = __vmread(GUEST_RIP);
-
- regs->xgs = __vmread(GUEST_GS_SELECTOR);
- regs->xfs = __vmread(GUEST_FS_SELECTOR);
- regs->xes = __vmread(GUEST_ES_SELECTOR);
- regs->xds = __vmread(GUEST_DS_SELECTOR);
-}
-
-void restore_cpu_user_regs(struct cpu_user_regs *regs)
-{
- __vmwrite(GUEST_SS_SELECTOR, regs->xss);
- __vmwrite(GUEST_RSP, regs->esp);
- __vmwrite(GUEST_RFLAGS, regs->eflags);
- __vmwrite(GUEST_CS_SELECTOR, regs->xcs);
- __vmwrite(GUEST_RIP, regs->eip);
-
- __vmwrite(GUEST_GS_SELECTOR, regs->xgs);
- __vmwrite(GUEST_FS_SELECTOR, regs->xfs);
- __vmwrite(GUEST_ES_SELECTOR, regs->xes);
- __vmwrite(GUEST_DS_SELECTOR, regs->xds);
-}
-#endif
-
static void vmx_reflect_exception(struct vcpu *v)
{
int error_code, intr_info, vector;
switch ( vector )
{
-#ifdef XEN_DEBUGGER
- case TRAP_debug:
- {
- save_cpu_user_regs(regs);
- pdb_handle_exception(1, regs, 1);
- restore_cpu_user_regs(regs);
- break;
- }
- case TRAP_int3:
- {
- save_cpu_user_regs(regs);
- pdb_handle_exception(3, regs, 1);
- restore_cpu_user_regs(regs);
- break;
- }
-#else
case TRAP_debug:
- {
- if ( test_bit(_DOMF_debugging, &v->domain->domain_flags) )
- {
- store_cpu_user_regs(regs);
+ if ( v->domain->debugger_attached )
domain_pause_for_debugger();
- __vm_clear_bit(GUEST_PENDING_DBG_EXCEPTIONS,
- PENDING_DEBUG_EXC_BS);
- }
else
- {
vmx_reflect_exception(v);
- __vm_clear_bit(GUEST_PENDING_DBG_EXCEPTIONS,
- PENDING_DEBUG_EXC_BS);
- }
-
break;
- }
case TRAP_int3:
- {
- if ( test_bit(_DOMF_debugging, &v->domain->domain_flags) )
+ if ( v->domain->debugger_attached )
domain_pause_for_debugger();
else
vmx_reflect_exception(v);
break;
- }
-#endif
case TRAP_no_device:
- {
vmx_do_no_device_fault();
break;
- }
case TRAP_page_fault:
- {
exit_qualification = __vmread(EXIT_QUALIFICATION);
regs->error_code = __vmread(VM_EXIT_INTR_ERROR_CODE);
v->arch.hvm_vmx.cpu_cr2 = exit_qualification;
vmx_inject_hw_exception(v, TRAP_page_fault, regs->error_code);
break;
- }
case TRAP_nmi:
HVMTRACE_0D(NMI, v);
if ( (intr_info & INTR_INFO_INTR_TYPE_MASK) == INTR_TYPE_NMI )
send_guest_global_virq(dom0, VIRQ_DOM_EXC);
}
-
void domain_pause_for_debugger(void)
{
struct domain *d = current->domain;
send_guest_global_virq(dom0, VIRQ_DEBUGGER);
}
+__attribute__ ((weak)) void domain_debug_state_changed(struct domain *d) { }
+
/* Complete domain destroy after RCU readers are not holding old references. */
static void complete_domain_destroy(struct rcu_head *head)
{
d = rcu_lock_domain_by_id(op->domain);
if ( d != NULL )
{
- if ( op->u.setdebugging.enable )
- set_bit(_DOMF_debugging, &d->domain_flags);
- else
- clear_bit(_DOMF_debugging, &d->domain_flags);
+ d->debugger_attached = !!op->u.setdebugging.enable;
rcu_unlock_domain(d);
ret = 0;
}
{
struct vcpu *v = current;
- if ( guest_kernel_mode(v, regs) &&
- test_bit(_DOMF_debugging, &v->domain->domain_flags) &&
+ if ( guest_kernel_mode(v, regs) && v->domain->debugger_attached &&
((vector == TRAP_int3) || (vector == TRAP_debug)) )
{
domain_pause_for_debugger();
#define TYPE_CLTS (2 << 4)
#define TYPE_LMSW (3 << 4)
-enum hval_bitmaps {
- EXCEPTION_BITMAP_TABLE=0,
-};
-
-#define EXCEPTION_BITMAP_DE (1 << 0) /* Divide Error */
-#define EXCEPTION_BITMAP_DB (1 << 1) /* Debug */
-#define EXCEPTION_BITMAP_NMI (1 << 2) /* NMI */
-#define EXCEPTION_BITMAP_BP (1 << 3) /* Breakpoint */
-#define EXCEPTION_BITMAP_OF (1 << 4) /* Overflow */
-#define EXCEPTION_BITMAP_BR (1 << 5) /* BOUND Range Exceeded */
-#define EXCEPTION_BITMAP_UD (1 << 6) /* Invalid Opcode */
-#define EXCEPTION_BITMAP_NM (1 << 7) /* Device Not Available */
-#define EXCEPTION_BITMAP_DF (1 << 8) /* Double Fault */
-/* reserved */
-#define EXCEPTION_BITMAP_TS (1 << 10) /* Invalid TSS */
-#define EXCEPTION_BITMAP_NP (1 << 11) /* Segment Not Present */
-#define EXCEPTION_BITMAP_SS (1 << 12) /* Stack-Segment Fault */
-#define EXCEPTION_BITMAP_GP (1 << 13) /* General Protection */
-#define EXCEPTION_BITMAP_PG (1 << 14) /* Page Fault */
-#define EXCEPTION_BITMAP_MF (1 << 16) /* x87 FPU Floating-Point Error (Math Fault) */
-#define EXCEPTION_BITMAP_AC (1 << 17) /* Alignment Check */
-#define EXCEPTION_BITMAP_MC (1 << 18) /* Machine Check */
-#define EXCEPTION_BITMAP_XF (1 << 19) /* SIMD Floating-Point Exception */
-
-/* Pending Debug exceptions */
-#define PENDING_DEBUG_EXC_BP (1 << 12) /* break point */
-#define PENDING_DEBUG_EXC_BS (1 << 14) /* Single step */
-
-#ifdef XEN_DEBUGGER
-#define MONITOR_DEFAULT_EXCEPTION_BITMAP \
- ( EXCEPTION_BITMAP_PG | \
- EXCEPTION_BITMAP_DB | \
- EXCEPTION_BITMAP_BP | \
- EXCEPTION_BITMAP_GP )
-#else
-#define MONITOR_DEFAULT_EXCEPTION_BITMAP \
- ( EXCEPTION_BITMAP_PG )
-#endif
-
#define VMX_DELIVER_NO_ERROR_CODE -1
#if HVM_DEBUG
int xen_port;
- /* Flags */
- int flag_dr_dirty;
+ bool_t flag_dr_dirty;
+ bool_t debug_state_latch;
union {
struct arch_vmx_struct vmx;
#define vmx_schedule_tail(next) \
(next)->thread.arch_vmx.arch_vmx_schedule_tail((next))
-void vmx_do_resume(struct vcpu *);
-
struct vmcs_struct *vmx_alloc_host_vmcs(void);
void vmx_free_host_vmcs(struct vmcs_struct *vmcs);
#include <asm/i387.h>
#include <asm/hvm/trace.h>
-extern void vmx_asm_vmexit_handler(struct cpu_user_regs);
-extern void vmx_asm_do_vmentry(void);
-extern void vmx_intr_assist(void);
-extern void arch_vmx_do_resume(struct vcpu *);
-extern void set_guest_time(struct vcpu *v, u64 gtime);
+void vmx_asm_vmexit_handler(struct cpu_user_regs);
+void vmx_asm_do_vmentry(void);
+void vmx_intr_assist(void);
+void vmx_do_resume(struct vcpu *);
+void set_guest_time(struct vcpu *v, u64 gtime);
extern unsigned int cpu_rev;
return ecx;
}
-static inline void __vm_set_bit(unsigned long field, unsigned long mask)
+static inline void __vm_set_bit(unsigned long field, unsigned int bit)
{
- __vmwrite(field, __vmread(field) | mask);
+ __vmwrite(field, __vmread(field) | (1UL << bit));
}
-static inline void __vm_clear_bit(unsigned long field, unsigned long mask)
+static inline void __vm_clear_bit(unsigned long field, unsigned int bit)
{
- __vmwrite(field, __vmread(field) & ~mask);
+ __vmwrite(field, __vmread(field) & ~(1UL << bit));
}
static inline void __vmxoff (void)
unsigned long domain_flags;
- /* Boolean: Is this an HVM guest? */
- char is_hvm;
-
- /* Boolean: Is this guest fully privileged (aka dom0)? */
- char is_privileged;
+ /* Is this an HVM guest? */
+ bool_t is_hvm;
+ /* Is this guest fully privileged (aka dom0)? */
+ bool_t is_privileged;
+ /* Is this guest being debugged by dom0? */
+ bool_t debugger_attached;
spinlock_t pause_lock;
unsigned int pause_count;
void domain_kill(struct domain *d);
void domain_shutdown(struct domain *d, u8 reason);
void domain_pause_for_debugger(void);
+void domain_debug_state_changed(struct domain *d);
/*
* Mark specified domain as crashed. This function always returns, even if the
/* Domain is paused by controller software. */
#define _DOMF_ctrl_pause 2
#define DOMF_ctrl_pause (1UL<<_DOMF_ctrl_pause)
- /* Domain is being debugged by controller software. */
-#define _DOMF_debugging 3
-#define DOMF_debugging (1UL<<_DOMF_debugging)
/* Are any VCPUs polling event channels (SCHEDOP_poll)? */
-#define _DOMF_polling 4
+#define _DOMF_polling 3
#define DOMF_polling (1UL<<_DOMF_polling)
/* Domain is paused by the hypervisor? */
-#define _DOMF_paused 5
+#define _DOMF_paused 4
#define DOMF_paused (1UL<<_DOMF_paused)
/* Domain is a compatibility one? */
-#define _DOMF_compat 6
+#define _DOMF_compat 5
#define DOMF_compat (1UL<<_DOMF_compat)
static inline int vcpu_runnable(struct vcpu *v)
#define LONG_MIN (-LONG_MAX - 1)
#define ULONG_MAX (~0UL)
+typedef char bool_t;
+
/* bsd */
typedef unsigned char u_char;
typedef unsigned short u_short;